summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiam <byteslice@airmail.cc>2023-12-26 05:21:08 +0100
committerLiam <byteslice@airmail.cc>2023-12-26 05:30:56 +0100
commitddda76f9b0d16e8a6fbc92db9e26f25843b647ed (patch)
tree3fd209d66b8503ca7f0cf6d15c5c065179c66076
parentMerge pull request #12394 from liamwhite/per-process-memory (diff)
downloadyuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.gz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.bz2
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.lz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.xz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.zst
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.zip
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/heap_tracker.cpp263
-rw-r--r--src/common/heap_tracker.h97
-rw-r--r--src/common/host_memory.cpp10
-rw-r--r--src/common/host_memory.h11
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp49
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h20
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp5
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp26
-rw-r--r--src/core/hle/kernel/k_page_table_base.h3
-rw-r--r--src/core/hle/kernel/k_process.cpp6
-rw-r--r--src/core/memory.cpp86
-rw-r--r--src/core/memory.h7
-rw-r--r--src/tests/common/host_memory.cpp99
16 files changed, 597 insertions, 93 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b58a7073f..8c57d47c6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -64,6 +64,8 @@ add_library(common STATIC
fs/path_util.cpp
fs/path_util.h
hash.h
+ heap_tracker.cpp
+ heap_tracker.h
hex_util.cpp
hex_util.h
host_memory.cpp
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..95dc8aa1e
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,263 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <algorithm>
+#include <vector>
+
+#include "common/heap_tracker.h"
+#include "common/logging/log.h"
+
+namespace Common {
+
+namespace {
+
+constexpr s64 MaxResidentMapCount = 0x8000;
+
+} // namespace
+
+HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {}
+HeapTracker::~HeapTracker() = default;
+
+void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
+ MemoryPermission perm, bool is_separate_heap) {
+ // When mapping other memory, map pages immediately.
+ if (!is_separate_heap) {
+ m_buffer.Map(virtual_offset, host_offset, length, perm, false);
+ return;
+ }
+
+ {
+ // We are mapping part of a separate heap.
+ std::scoped_lock lk{m_lock};
+
+ auto* const map = new SeparateHeapMap{
+ .vaddr = virtual_offset,
+ .paddr = host_offset,
+ .size = length,
+ .tick = m_tick++,
+ .perm = perm,
+ .is_resident = false,
+ };
+
+ // Insert into mappings.
+ m_map_count++;
+ m_mappings.insert(*map);
+ }
+
+ // Finally, map.
+ this->DeferredMapSeparateHeap(virtual_offset);
+}
+
+void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
+ // If this is a separate heap...
+ if (is_separate_heap) {
+ std::scoped_lock lk{m_lock};
+
+ const SeparateHeapMap key{
+ .vaddr = virtual_offset,
+ };
+
+ // Split at the boundaries of the region we are removing.
+ this->SplitHeapMapLocked(virtual_offset);
+ this->SplitHeapMapLocked(virtual_offset + size);
+
+ // Erase all mappings in range.
+ auto it = m_mappings.find(key);
+ while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
+ // Get underlying item.
+ auto* const item = std::addressof(*it);
+
+ // If resident, erase from resident map.
+ if (item->is_resident) {
+ ASSERT(--m_resident_map_count >= 0);
+ m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
+ }
+
+ // Erase from map.
+ it = m_mappings.erase(it);
+ ASSERT(--m_map_count >= 0);
+
+ // Free the item.
+ delete item;
+ }
+ }
+
+ // Unmap pages.
+ m_buffer.Unmap(virtual_offset, size, false);
+}
+
+void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
+ // Ensure no rebuild occurs while reprotecting.
+ std::shared_lock lk{m_rebuild_lock};
+
+ // Split at the boundaries of the region we are reprotecting.
+ this->SplitHeapMap(virtual_offset, size);
+
+ // Declare tracking variables.
+ VAddr cur = virtual_offset;
+ VAddr end = virtual_offset + size;
+
+ while (cur < end) {
+ VAddr next = cur;
+ bool should_protect = false;
+
+ {
+ std::scoped_lock lk2{m_lock};
+
+ const SeparateHeapMap key{
+ .vaddr = next,
+ };
+
+ // Try to get the next mapping corresponding to this address.
+ const auto it = m_mappings.nfind(key);
+
+ if (it == m_mappings.end()) {
+ // There are no separate heap mappings remaining.
+ next = end;
+ should_protect = true;
+ } else if (it->vaddr == cur) {
+ // We are in range.
+ // Update permission bits.
+ it->perm = perm;
+
+ // Determine next address and whether we should protect.
+ next = cur + it->size;
+ should_protect = it->is_resident;
+ } else /* if (it->vaddr > cur) */ {
+ // We weren't in range, but there is a block coming up that will be.
+ next = it->vaddr;
+ should_protect = true;
+ }
+ }
+
+ // Clamp to end.
+ next = std::min(next, end);
+
+ // Reprotect, if we need to.
+ if (should_protect) {
+ m_buffer.Protect(cur, next - cur, perm);
+ }
+
+ // Advance.
+ cur = next;
+ }
+}
+
+bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
+ if (m_buffer.IsInVirtualRange(fault_address)) {
+ return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
+ }
+
+ return false;
+}
+
+bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
+ bool rebuild_required = false;
+
+ {
+ std::scoped_lock lk{m_lock};
+
+ // Check to ensure this was a non-resident separate heap mapping.
+ const auto it = this->GetNearestHeapMapLocked(virtual_offset);
+ if (it == m_mappings.end() || it->is_resident) {
+ return false;
+ }
+
+ // Update tick before possible rebuild.
+ it->tick = m_tick++;
+
+ // Check if we need to rebuild.
+ if (m_resident_map_count > MaxResidentMapCount) {
+ rebuild_required = true;
+ }
+
+ // Map the area.
+ m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
+
+ // This map is now resident.
+ it->is_resident = true;
+ m_resident_map_count++;
+ m_resident_mappings.insert(*it);
+ }
+
+ if (rebuild_required) {
+ // A rebuild was required, so perform it now.
+ this->RebuildSeparateHeapAddressSpace();
+ }
+
+ return true;
+}
+
+void HeapTracker::RebuildSeparateHeapAddressSpace() {
+ std::scoped_lock lk{m_rebuild_lock, m_lock};
+
+ ASSERT(!m_resident_mappings.empty());
+
+ // Unmap so we have at least 4 maps available.
+ const size_t desired_count = std::min(m_resident_map_count, MaxResidentMapCount - 4);
+ const size_t evict_count = m_resident_map_count - desired_count;
+ auto it = m_resident_mappings.begin();
+
+ for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
+ // Unmark and unmap.
+ it->is_resident = false;
+ m_buffer.Unmap(it->vaddr, it->size, false);
+
+ // Advance.
+ ASSERT(--m_resident_map_count >= 0);
+ it = m_resident_mappings.erase(it);
+ }
+}
+
+void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
+ std::scoped_lock lk{m_lock};
+
+ this->SplitHeapMapLocked(offset);
+ this->SplitHeapMapLocked(offset + size);
+}
+
+void HeapTracker::SplitHeapMapLocked(VAddr offset) {
+ const auto it = this->GetNearestHeapMapLocked(offset);
+ if (it == m_mappings.end() || it->vaddr == offset) {
+ // Not contained or no split required.
+ return;
+ }
+
+ // Cache the original values.
+ auto* const left = std::addressof(*it);
+ const size_t orig_size = left->size;
+
+ // Adjust the left map.
+ const size_t left_size = offset - left->vaddr;
+ left->size = left_size;
+
+ // Create the new right map.
+ auto* const right = new SeparateHeapMap{
+ .vaddr = left->vaddr + left_size,
+ .paddr = left->paddr + left_size,
+ .size = orig_size - left_size,
+ .tick = left->tick,
+ .perm = left->perm,
+ .is_resident = left->is_resident,
+ };
+
+ // Insert the new right map.
+ m_map_count++;
+ m_mappings.insert(*right);
+
+ // If resident, also insert into resident map.
+ if (right->is_resident) {
+ m_resident_mappings.insert(*right);
+ m_resident_map_count++;
+ }
+}
+
+HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
+ const SeparateHeapMap key{
+ .vaddr = offset,
+ };
+
+ return m_mappings.find(key);
+}
+
+} // namespace Common
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h
new file mode 100644
index 000000000..cc16041d9
--- /dev/null
+++ b/src/common/heap_tracker.h
@@ -0,0 +1,97 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <atomic>
+#include <mutex>
+#include <set>
+#include <shared_mutex>
+
+#include "common/host_memory.h"
+#include "common/intrusive_red_black_tree.h"
+
+namespace Common {
+
+struct SeparateHeapMap {
+ Common::IntrusiveRedBlackTreeNode addr_node{};
+ Common::IntrusiveRedBlackTreeNode tick_node{};
+ VAddr vaddr{};
+ PAddr paddr{};
+ size_t size{};
+ size_t tick{};
+ MemoryPermission perm{};
+ bool is_resident{};
+};
+
+struct SeparateHeapMapAddrComparator {
+ static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
+ if (lhs.vaddr < rhs.vaddr) {
+ return -1;
+ } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+};
+
+struct SeparateHeapMapTickComparator {
+ static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
+ if (lhs.tick < rhs.tick) {
+ return -1;
+ } else if (lhs.tick > rhs.tick) {
+ return 1;
+ } else {
+ return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
+ }
+ }
+};
+
+class HeapTracker {
+public:
+ explicit HeapTracker(Common::HostMemory& buffer);
+ ~HeapTracker();
+
+ void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
+ bool is_separate_heap);
+ void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
+ void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
+ u8* VirtualBasePointer() {
+ return m_buffer.VirtualBasePointer();
+ }
+
+ bool DeferredMapSeparateHeap(u8* fault_address);
+ bool DeferredMapSeparateHeap(size_t virtual_offset);
+
+private:
+ using AddrTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
+ using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
+
+ using TickTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
+ using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
+
+ AddrTree m_mappings{};
+ TickTree m_resident_mappings{};
+
+private:
+ void SplitHeapMap(VAddr offset, size_t size);
+ void SplitHeapMapLocked(VAddr offset);
+
+ AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
+
+ void RebuildSeparateHeapAddressSpace();
+
+private:
+ Common::HostMemory& m_buffer;
+
+ std::shared_mutex m_rebuild_lock{};
+ std::mutex m_lock{};
+ s64 m_map_count{};
+ s64 m_resident_map_count{};
+ size_t m_tick{};
+};
+
+} // namespace Common
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index e540375b8..860c39e6a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
- MemoryPermission perms) {
+ MemoryPermission perms, bool separate_heap) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(host_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
}
-void HostMemory::Unmap(size_t virtual_offset, size_t length) {
+void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
impl->Unmap(virtual_offset + virtual_base_offset, length);
}
-void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write,
- bool execute) {
+void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
ASSERT(virtual_offset + length <= virtual_size);
if (length == 0 || !virtual_base || !impl) {
return;
}
+ const bool read = True(perm & MemoryPermission::Read);
+ const bool write = True(perm & MemoryPermission::Write);
+ const bool execute = True(perm & MemoryPermission::Execute);
impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
}
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 747c5850c..72fbb05af 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -40,11 +40,12 @@ public:
HostMemory(HostMemory&& other) noexcept;
HostMemory& operator=(HostMemory&& other) noexcept;
- void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms);
+ void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
+ bool separate_heap);
- void Unmap(size_t virtual_offset, size_t length);
+ void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
- void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false);
+ void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
void EnableDirectMappedAddress();
@@ -64,6 +65,10 @@ public:
return virtual_base;
}
+ bool IsInVirtualRange(void* address) const noexcept {
+ return address >= virtual_base && address < virtual_base + virtual_size;
+ }
+
private:
size_t backing_size{};
size_t virtual_size{};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 96ab39cb8..e960edb47 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -978,6 +978,7 @@ endif()
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
target_sources(core PRIVATE
+ arm/dynarmic/arm_dynarmic.cpp
arm/dynarmic/arm_dynarmic.h
arm/dynarmic/arm_dynarmic_64.cpp
arm/dynarmic/arm_dynarmic_64.h
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
new file mode 100644
index 000000000..e6e9fc45b
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -0,0 +1,49 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifdef __linux__
+
+#include "common/signal_chain.h"
+
+#include "core/arm/dynarmic/arm_dynarmic.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/memory.h"
+
+namespace Core {
+
+namespace {
+
+thread_local Core::Memory::Memory* g_current_memory{};
+std::once_flag g_registered{};
+struct sigaction g_old_segv {};
+
+void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
+ if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
+ return;
+ }
+
+ return g_old_segv.sa_sigaction(sig, info, ctx);
+}
+
+} // namespace
+
+ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
+ g_current_memory = std::addressof(process->GetMemory());
+}
+
+ScopedJitExecution::~ScopedJitExecution() {
+ g_current_memory = nullptr;
+}
+
+void ScopedJitExecution::RegisterHandler() {
+ std::call_once(g_registered, [] {
+ struct sigaction sa {};
+ sa.sa_sigaction = &HandleSigSegv;
+ sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
+ });
+}
+
+} // namespace Core
+
+#endif
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index eef7c3116..53dd18815 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
return static_cast<HaltReason>(hr);
}
+#ifdef __linux__
+
+class ScopedJitExecution {
+public:
+ explicit ScopedJitExecution(Kernel::KProcess* process);
+ ~ScopedJitExecution();
+ static void RegisterHandler();
+};
+
+#else
+
+class ScopedJitExecution {
+public:
+ explicit ScopedJitExecution(Kernel::KProcess* process) {}
+ ~ScopedJitExecution() {}
+ static void RegisterHandler() {}
+};
+
+#endif
+
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index c78cfd528..36478f722 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
}
HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
m_jit = MakeJit(&page_table_impl);
+ ScopedJitExecution::RegisterHandler();
}
ArmDynarmic32::~ArmDynarmic32() = default;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index f351b13d9..c811c8ad5 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
}
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
auto& page_table = process->GetPageTable().GetBasePageTable();
auto& page_table_impl = page_table.GetImpl();
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
+ ScopedJitExecution::RegisterHandler();
}
ArmDynarmic64::~ArmDynarmic64() = default;
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 423289145..8c1549559 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
void KPageTableBase::Finalize() {
auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
if (Settings::IsFastmemEnabled()) {
- m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
+ m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
}
};
@@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Unmap.
R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
cur_pages, 0, false, unmap_properties,
- OperationType::Unmap, true));
+ OperationType::UnmapPhysical, true));
}
// Check if we're done.
@@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Map the papges.
R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
cur_pg, map_properties,
- OperationType::MapFirstGroup, false));
+ OperationType::MapFirstGroupPhysical, false));
}
}
@@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
// Unmap.
R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
- unmap_properties, OperationType::Unmap, false));
+ unmap_properties, OperationType::UnmapPhysical, false));
}
// Check if we're done.
@@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
// or free them to the page list, and so it goes unused (along with page properties).
switch (operation) {
- case OperationType::Unmap: {
+ case OperationType::Unmap:
+ case OperationType::UnmapPhysical: {
+ const bool separate_heap = operation == OperationType::UnmapPhysical;
+
// Ensure that any pages we track are closed on exit.
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
@@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
// Unmap.
- m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
+ m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);
R_SUCCEED();
}
@@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
ASSERT(virt_addr != 0);
ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
- ConvertToMemoryPermission(properties.perm));
+ ConvertToMemoryPermission(properties.perm), false);
// Open references to pages, if we should.
if (this->IsHeapPhysicalAddress(phys_addr)) {
@@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
switch (operation) {
case OperationType::MapGroup:
- case OperationType::MapFirstGroup: {
+ case OperationType::MapFirstGroup:
+ case OperationType::MapFirstGroupPhysical: {
+ const bool separate_heap = operation == OperationType::MapFirstGroupPhysical;
+
// We want to maintain a new reference to every page in the group.
- KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
+ KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);
for (const auto& node : page_group) {
const size_t size{node.GetNumPages() * PageSize};
// Map the pages.
m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
- ConvertToMemoryPermission(properties.perm));
+ ConvertToMemoryPermission(properties.perm), separate_heap);
virt_addr += size;
}
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
index 556d230b3..077cafc96 100644
--- a/src/core/hle/kernel/k_page_table_base.h
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -104,6 +104,9 @@ protected:
ChangePermissionsAndRefresh = 5,
ChangePermissionsAndRefreshAndFlush = 6,
Separate = 7,
+
+ MapFirstGroupPhysical = 65000,
+ UnmapPhysical = 65001,
};
static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d6869c228..068e71dff 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
auto& buffer = m_kernel.System().DeviceMemory().buffer;
const auto& code = code_set.CodeSegment();
const auto& patch = code_set.PatchSegment();
- buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true);
- buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true);
+ buffer.Protect(GetInteger(base_addr + code.addr), code.size,
+ Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
+ buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
+ Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
}
#endif
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index c7eb32c19..8176a41be 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_types.h"
+#include "common/heap_tracker.h"
#include "common/logging/log.h"
#include "common/page_table.h"
#include "common/scope_exit.h"
@@ -52,10 +53,18 @@ struct Memory::Impl {
} else {
current_page_table->fastmem_arena = nullptr;
}
+
+#ifdef __linux__
+ heap_tracker.emplace(system.DeviceMemory().buffer);
+ buffer = std::addressof(*heap_tracker);
+#else
+ buffer = std::addressof(system.DeviceMemory().buffer);
+#endif
}
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target, Common::MemoryPermission perms) {
+ Common::PhysicalAddress target, Common::MemoryPermission perms,
+ bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -64,19 +73,20 @@ struct Memory::Impl {
Common::PageType::Memory);
if (current_page_table->fastmem_arena) {
- system.DeviceMemory().buffer.Map(GetInteger(base),
- GetInteger(target) - DramMemoryMap::Base, size, perms);
+ buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
+ separate_heap);
}
}
- void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
+ void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
Common::PageType::Unmapped);
if (current_page_table->fastmem_arena) {
- system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
+ buffer->Unmap(GetInteger(base), size, separate_heap);
}
}
@@ -89,11 +99,6 @@ struct Memory::Impl {
return;
}
- const bool is_r = True(perms & Common::MemoryPermission::Read);
- const bool is_w = True(perms & Common::MemoryPermission::Write);
- const bool is_x =
- True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
-
u64 protect_bytes{};
u64 protect_begin{};
for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -102,8 +107,7 @@ struct Memory::Impl {
switch (page_type) {
case Common::PageType::RasterizerCachedMemory:
if (protect_bytes > 0) {
- system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w,
- is_x);
+ buffer->Protect(protect_begin, protect_bytes, perms);
protect_bytes = 0;
}
break;
@@ -116,7 +120,7 @@ struct Memory::Impl {
}
if (protect_bytes > 0) {
- system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x);
+ buffer->Protect(protect_begin, protect_bytes, perms);
}
}
@@ -486,7 +490,9 @@ struct Memory::Impl {
}
if (current_page_table->fastmem_arena) {
- system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug);
+ const auto perm{debug ? Common::MemoryPermission{}
+ : Common::MemoryPermission::ReadWrite};
+ buffer->Protect(vaddr, size, perm);
}
// Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -543,9 +549,14 @@ struct Memory::Impl {
}
if (current_page_table->fastmem_arena) {
- const bool is_read_enable =
- !Settings::values.use_reactive_flushing.GetValue() || !cached;
- system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
+ Common::MemoryPermission perm{};
+ if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
+ perm |= Common::MemoryPermission::Read;
+ }
+ if (!cached) {
+ perm |= Common::MemoryPermission::Write;
+ }
+ buffer->Protect(vaddr, size, perm);
}
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -856,6 +867,13 @@ struct Memory::Impl {
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
std::mutex sys_core_guard;
+
+ std::optional<Common::HeapTracker> heap_tracker;
+#ifdef __linux__
+ Common::HeapTracker* buffer{};
+#else
+ Common::HostMemory* buffer{};
+#endif
};
Memory::Memory(Core::System& system_) : system{system_} {
@@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
}
void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target, Common::MemoryPermission perms) {
- impl->MapMemoryRegion(page_table, base, size, target, perms);
+ Common::PhysicalAddress target, Common::MemoryPermission perms,
+ bool separate_heap) {
+ impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
}
-void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
- impl->UnmapRegion(page_table, base, size);
+void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ bool separate_heap) {
+ impl->UnmapRegion(page_table, base, size, separate_heap);
}
void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
}
bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
- bool mapped = true;
+ [[maybe_unused]] bool mapped = true;
+ [[maybe_unused]] bool rasterizer = false;
+
u8* const ptr = impl->GetPointerImpl(
GetInteger(vaddr),
[&] {
@@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
GetInteger(vaddr));
mapped = false;
},
- [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); });
+ [&] {
+ impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
+ rasterizer = true;
+ });
+
+#ifdef __linux__
+ if (!rasterizer && mapped) {
+ impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
+ }
+#endif
+
return mapped && ptr != nullptr;
}
+bool Memory::InvalidateSeparateHeap(void* fault_address) {
+#ifdef __linux__
+ return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
+#else
+ return false;
+#endif
+}
+
} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index c1879e78f..3e4d03f57 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -86,7 +86,8 @@ public:
* @param perms The permissions to map the memory with.
*/
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target, Common::MemoryPermission perms);
+ Common::PhysicalAddress target, Common::MemoryPermission perms,
+ bool separate_heap);
/**
* Unmaps a region of the emulated process address space.
@@ -95,7 +96,8 @@ public:
* @param base The address to begin unmapping at.
* @param size The amount of bytes to unmap.
*/
- void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
+ void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ bool separate_heap);
/**
* Protects a region of the emulated process address space with the new permissions.
@@ -486,6 +488,7 @@ public:
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
+ bool InvalidateSeparateHeap(void* fault_address);
void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
private:
diff --git a/src/tests/common/host_memory.cpp b/src/tests/common/host_memory.cpp
index 1a28e862b..cb040c942 100644
--- a/src/tests/common/host_memory.cpp
+++ b/src/tests/common/host_memory.cpp
@@ -12,6 +12,7 @@ using namespace Common::Literals;
static constexpr size_t VIRTUAL_SIZE = 1ULL << 39;
static constexpr size_t BACKING_SIZE = 4_GiB;
static constexpr auto PERMS = Common::MemoryPermission::ReadWrite;
+static constexpr auto HEAP = false;
TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
{ HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); }
@@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
TEST_CASE("HostMemory: Simple map", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x5000, 0x8000, 0x1000, PERMS);
+ mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP);
volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
data[0] = 50;
@@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") {
TEST_CASE("HostMemory: Simple mirror map", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x5000, 0x3000, 0x2000, PERMS);
- mem.Map(0x8000, 0x4000, 0x1000, PERMS);
+ mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
+ mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP);
volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000;
volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000;
@@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") {
TEST_CASE("HostMemory: Simple unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x5000, 0x3000, 0x2000, PERMS);
+ mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
data[75] = 50;
REQUIRE(data[75] == 50);
- mem.Unmap(0x5000, 0x2000);
+ mem.Unmap(0x5000, 0x2000, HEAP);
}
TEST_CASE("HostMemory: Simple unmap and remap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x5000, 0x3000, 0x2000, PERMS);
+ mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
data[0] = 50;
REQUIRE(data[0] == 50);
- mem.Unmap(0x5000, 0x2000);
+ mem.Unmap(0x5000, 0x2000, HEAP);
- mem.Map(0x5000, 0x3000, 0x2000, PERMS);
+ mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
REQUIRE(data[0] == 50);
- mem.Map(0x7000, 0x2000, 0x5000, PERMS);
+ mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP);
REQUIRE(data[0x3000] == 50);
}
TEST_CASE("HostMemory: Nieche allocation", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x0000, 0, 0x20000, PERMS);
- mem.Unmap(0x0000, 0x4000);
- mem.Map(0x1000, 0, 0x2000, PERMS);
- mem.Map(0x3000, 0, 0x1000, PERMS);
- mem.Map(0, 0, 0x1000, PERMS);
+ mem.Map(0x0000, 0, 0x20000, PERMS, HEAP);
+ mem.Unmap(0x0000, 0x4000, HEAP);
+ mem.Map(0x1000, 0, 0x2000, PERMS, HEAP);
+ mem.Map(0x3000, 0, 0x1000, PERMS, HEAP);
+ mem.Map(0, 0, 0x1000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Full unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x8000, 0, 0x4000, PERMS);
- mem.Unmap(0x8000, 0x4000);
- mem.Map(0x6000, 0, 0x16000, PERMS);
+ mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
+ mem.Unmap(0x8000, 0x4000, HEAP);
+ mem.Map(0x6000, 0, 0x16000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x0000, 0, 0x4000, PERMS);
- mem.Unmap(0x2000, 0x4000);
- mem.Map(0x2000, 0x80000, 0x4000, PERMS);
+ mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
+ mem.Unmap(0x2000, 0x4000, HEAP);
+ mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x8000, 0, 0x4000, PERMS);
- mem.Unmap(0x6000, 0x4000);
- mem.Map(0x8000, 0, 0x2000, PERMS);
+ mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
+ mem.Unmap(0x6000, 0x4000, HEAP);
+ mem.Map(0x8000, 0, 0x2000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x0000, 0, 0x4000, PERMS);
- mem.Map(0x4000, 0, 0x1b000, PERMS);
- mem.Unmap(0x3000, 0x1c000);
- mem.Map(0x3000, 0, 0x20000, PERMS);
+ mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
+ mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP);
+ mem.Unmap(0x3000, 0x1c000, HEAP);
+ mem.Map(0x3000, 0, 0x20000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Unmap between placeholders", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x0000, 0, 0x4000, PERMS);
- mem.Map(0x4000, 0, 0x4000, PERMS);
- mem.Unmap(0x2000, 0x4000);
- mem.Map(0x2000, 0, 0x4000, PERMS);
+ mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
+ mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
+ mem.Unmap(0x2000, 0x4000, HEAP);
+ mem.Map(0x2000, 0, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Unmap to origin", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x4000, 0, 0x4000, PERMS);
- mem.Map(0x8000, 0, 0x4000, PERMS);
- mem.Unmap(0x4000, 0x4000);
- mem.Map(0, 0, 0x4000, PERMS);
- mem.Map(0x4000, 0, 0x4000, PERMS);
+ mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
+ mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
+ mem.Unmap(0x4000, 0x4000, HEAP);
+ mem.Map(0, 0, 0x4000, PERMS, HEAP);
+ mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Unmap to right", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x4000, 0, 0x4000, PERMS);
- mem.Map(0x8000, 0, 0x4000, PERMS);
- mem.Unmap(0x8000, 0x4000);
- mem.Map(0x8000, 0, 0x4000, PERMS);
+ mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
+ mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
+ mem.Unmap(0x8000, 0x4000, HEAP);
+ mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x4000, 0x10000, 0x4000, PERMS);
+ mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x1000] = 17;
- mem.Unmap(0x6000, 0x2000);
+ mem.Unmap(0x6000, 0x2000, HEAP);
REQUIRE(ptr[0x1000] == 17);
}
TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x4000, 0x10000, 0x4000, PERMS);
+ mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x3000] = 19;
ptr[0x3fff] = 12;
- mem.Unmap(0x4000, 0x2000);
+ mem.Unmap(0x4000, 0x2000, HEAP);
REQUIRE(ptr[0x3000] == 19);
REQUIRE(ptr[0x3fff] == 12);
@@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x4000, 0x10000, 0x4000, PERMS);
+ mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x0000] = 19;
ptr[0x3fff] = 12;
- mem.Unmap(0x1000, 0x2000);
+ mem.Unmap(0x1000, 0x2000, HEAP);
REQUIRE(ptr[0x0000] == 19);
REQUIRE(ptr[0x3fff] == 12);
@@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
- mem.Map(0x4000, 0x10000, 0x2000, PERMS);
- mem.Map(0x6000, 0x20000, 0x2000, PERMS);
+ mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP);
+ mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x0000] = 19;
ptr[0x3fff] = 12;
- mem.Unmap(0x5000, 0x2000);
+ mem.Unmap(0x5000, 0x2000, HEAP);
REQUIRE(ptr[0x0000] == 19);
REQUIRE(ptr[0x3fff] == 12);